The goals / steps of this project are the following:
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
%matplotlib inline
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob('camera_cal/calibration*.jpg')
plt.figure(figsize=(18, 10))
i = 0
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
i += 1
plt.subplot(4,5,i)
plt.imshow(img)
plt.show()
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
plt.figure(figsize=(18, 10))
i = 0
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
undist = cv2.undistort(img, mtx, dist, None, mtx)
i += 1
plt.subplot(4,5,i)
plt.imshow(undist)
plt.show()
straight_lines_images = glob.glob('test_images/straight_lines*.jpg')
bottom_y = 720
top_y = 450
plt_width = 15
img = cv2.imread(fname)
h,w,_ = img.shape
tl = (598, top_y)
tr = (686, top_y)
bl = (230, bottom_y)
br = (1093, bottom_y)
trap = np.float32([tl, tr, br, bl])
rect_w = br[0] - bl[0]
print(rect_w)
rect_h = h
off_centre = ((br[0] + bl[0])-w)/2
tl = ((w-rect_w)/2+off_centre, 0)
tr = (w-tl[0]+2*off_centre, 0)
bl = (tl[0], rect_h)
br = (tr[0], rect_h)
rect = np.float32([tl, tr, br, bl])
M = cv2.getPerspectiveTransform(trap, rect)
for fname in straight_lines_images:
img = cv2.imread(fname)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.undistort(img, mtx, dist, None, mtx)
plt.figure(figsize=(plt_width, int(plt_width/w*h)))
plt.imshow(img)
x = [a[0] for a in trap]
y = [a[1] for a in trap]
plt.fill(x, y, 'g', alpha=0.5)
print(rect_h)
img = cv2.warpPerspective(img, M, (w, rect_h))
print(img.shape)
plt.figure()
plt.imshow(img)
x = [a[0] for a in rect]
y = [a[1] for a in rect]
plt.fill(x, y, 'g', alpha=0.5)
plt.gca().set_ylim(rect_h, 0)
plt.show()
test_images = glob.glob('test_images/test*.jpg')
plt.figure(figsize=(10, 20))
i = 0;
for fname in test_images:
img = cv2.imread(fname)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.undistort(img, mtx, dist, None, mtx)
i += 1
plt.subplot(len(test_images), 2, i)
plt.imshow(img)
img = cv2.warpPerspective(img, M, (w, rect_h))
i += 1
plt.subplot(len(test_images), 2, i)
plt.imshow(img)
plt.show()
# Define a function that takes an image, gradient orientation,
# and threshold min / max values.
def abs_sobel_thresh(img, orient='x', thresh=(0, 255), sobel_kernel=3):
thresh_min = thresh[0]
thresh_max = thresh[1]
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Apply x or y gradient with the OpenCV Sobel() function
# and take the absolute value
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
# Rescale back to 8 bit integer
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# Create a copy and apply the threshold
binary_output = np.zeros_like(scaled_sobel)
# Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too
binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
# Return the result
return binary_output
# Define a function to return the magnitude of the gradient
# for a given sobel kernel size and threshold values
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Take both Sobel x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the gradient magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# Rescale to 8 bit
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
# Create a binary image of ones where threshold is met, zeros otherwise
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
# Return the binary image
return binary_output
# Define a function to threshold an image for a given range and Sobel kernel
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi/2)):
# Grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Calculate the x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Take the absolute value of the gradient direction,
# apply a threshold, and create a binary image result
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
# Return the binary image
return binary_output
# Define a function that thresholds the S-channel of HLS
def hls_select(img, thresh=(0, 255)):
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:,:,2]
binary_output = np.zeros_like(s_channel)
binary_output[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 1
return binary_output
# Define a function that thresholds the S-channel of HLS
def red_select(img, thresh=(0, 255)):
r_channel = img[:,:,2]
binary_output = np.zeros_like(r_channel)
binary_output[(r_channel > thresh[0]) & (r_channel <= thresh[1])] = 1
return binary_output
# Choose a Sobel kernel size
ksize = 3 # Choose a larger odd number to smooth gradient measurements
test_images = glob.glob('test_images/test*.jpg')
plt.figure(figsize=(20, 20))
i = 0;
for fname in test_images:
img = cv2.imread(fname)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.undistort(img, mtx, dist, None, mtx)
img = cv2.warpPerspective(img, M, (w, rect_h))
# Apply each of the thresholding functions
gradx = abs_sobel_thresh(img, orient='x', sobel_kernel=ksize, thresh=(20, 255))
grady = abs_sobel_thresh(img, orient='y', sobel_kernel=ksize, thresh=(10, 255))
mag_binary = mag_thresh(img, sobel_kernel=ksize, mag_thresh=(5, 255))
dir_binary = dir_threshold(img, sobel_kernel=ksize, thresh=(0, np.pi/5))
s_binary = hls_select(img, thresh=(140, 255))
r_binary = red_select(img, thresh=(200, 255))
total_plot = 7
i += 1
plt.subplot(len(test_images), total_plot, i)
plt.imshow(gradx, cmap='gray')
plt.title("gradx threshold")
i += 1
plt.subplot(len(test_images), total_plot, i)
plt.imshow(grady, cmap='gray')
plt.title("grady threshold")
i += 1
plt.subplot(len(test_images), total_plot, i)
plt.imshow(mag_binary, cmap='gray')
plt.title("mag threshold")
i += 1
plt.subplot(len(test_images), total_plot, i)
plt.imshow(dir_binary, cmap='gray')
plt.title("dir threshold")
i += 1
plt.subplot(len(test_images), total_plot, i)
plt.imshow(s_binary, cmap='gray')
plt.title("s-channel threshold")
i += 1
plt.subplot(len(test_images), total_plot, i)
plt.imshow(r_binary, cmap='gray')
plt.title("red-channel threshold")
combined = np.zeros_like(dir_binary)
combined[(gradx == 1) | (s_binary == 1) | (r_binary == 1)] = 1
i += 1
plt.subplot(len(test_images), total_plot, i)
plt.imshow(combined, cmap='gray')
plt.title("combined")
plt.show()
def color_and_gradient_thresholding(img):
gradx = abs_sobel_thresh(img, orient='x', sobel_kernel=ksize, thresh=(20, 255))
s_binary = hls_select(img, thresh=(140, 255))
r_binary = red_select(img, thresh=(200, 255))
combined = np.zeros_like(s_binary)
combined[(gradx == 1) | (s_binary == 1) | (r_binary == 1)] = 1
return combined
# find pixels relatings to left and right lane lines given an image
def sliding_window_search(binary_warped, plot=False):
# Scan bottom half of image to determine a starting point of sliding window detection
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
out_img = None
if plot:
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
# assume mid point is right down middle of image
midpoint = np.int(histogram.shape[0]/2)
# find the max indicies in left and right half of the image
leftx_base = np.argmax(histogram[:midpoint ])
rightx_base = np.argmax(histogram[ midpoint:]) + midpoint
# from the base position, find lane lines for each window
# Choose the number of sliding windows
nwindows = 9
window_height = np.int(binary_warped.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
if plot:
# Draw the windows on the visualization image
cv2.rectangle(out_img, (win_xleft_low,win_y_low), (win_xleft_high,win_y_high), (0,255,0), 4)
cv2.rectangle(out_img, (win_xright_low,win_y_low), (win_xright_high,win_y_high), (0,255,0), 4)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low ) &
(nonzeroy < win_y_high ) &
(nonzerox >= win_xleft_low ) &
(nonzerox < win_xleft_high) ).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low ) &
(nonzeroy < win_y_high ) &
(nonzerox >= win_xright_low ) &
(nonzerox < win_xright_high) ).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# update the line objects
left_line.allx = leftx
left_line.ally = lefty
right_line.allx = rightx
right_line.ally = righty
if plot:
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds] ] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
return leftx, lefty, rightx, righty, out_img
# Define a class to receive the characteristics of each line detection
class Line():
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/rect_w # meters per pixel in x dimension
n = 10
margin = 150
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_fitted = []
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
#radius of curvature of the line in some units
self.radius_of_curvature = None
#distance in meters of vehicle center from the line
self.line_base_pos = None
#difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
def update(self, detected, current_fit):
if detected:
self.detected = True
self.recent_fitted.append(current_fit)
if len(self.recent_fitted) > self.n:
self.recent_fitted.pop(0)
else:
self.detected = False
if len(self.recent_fitted):
self.recent_fitted.pop(0)
self.update_best_fit()
def fit(self, x, y):
if len(x) and len(y):
current_fit = np.polyfit(y*self.ym_per_pix, x*self.xm_per_pix, 2)
self.update(True, current_fit)
def pop(self):
if len(self.recent_fitted):
self.recent_fitted.pop()
self.update_best_fit()
def update_best_fit(self):
if len(self.recent_fitted):
self.best_fit = sum(self.recent_fitted)/len(self.recent_fitted)
else:
self.best_fit = None
def evaluate(self, y):
if self.best_fit is not None:
x = (self.best_fit[0]*(y*self.ym_per_pix)**2 + self.best_fit[1]*(y*self.ym_per_pix) + self.best_fit[2])/self.xm_per_pix
else:
x = -1
return x
def get_curvature(self, y):
if self.best_fit is not None:
c = ((1+(2*self.best_fit[0]*y*self.ym_per_pix+self.best_fit[1])**2)**1.5)/np.absolute(2*self.best_fit[0])
else:
c = -1
return c
def get_mask(self, w, h):
if len(self.recent_fitted):
# Generate x and y values for plotting
polyy = np.linspace(0, h)
polyx = self.evaluate(polyy)
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([polyx-self.margin, polyy]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([polyx+self.margin, polyy])))])
pts = np.hstack((pts_left, pts_right))
mask = np.zeros((w, h), dtype=np.uint8)
cv2.fillPoly(mask, np.int_(pts), 1)
return mask
else:
return None
def sanity_check(left_line, right_line):
y = 360
lcur = left_line.get_curvature(y)
rcur = right_line.get_curvature(y)
# Checking that they have similar curvature
cur_min = 195.986
if lcur < cur_min or rcur < cur_min:
print("curvature failed", lcur, rcur)
return False
# Checking that they are roughly parallel
ploty = np.linspace(0, 719)
left_fitx = left_line.evaluate(ploty)
right_fitx = right_line.evaluate(ploty)
xdiff = right_fitx - left_fitx
# Checking that they are separated by approximately the right distance horizontally
if np.std(xdiff)/np.mean(xdiff) > 0.06:
print("parallel failed", np.std(xdiff)/np.mean(xdiff))
return False
if abs(np.mean(xdiff) - rect_w)/rect_w > 0.2:
print("separation failed")
return False
return True
left_masked = None
def pipeline(img, plot=False):
global left_line, right_line,left_masked
# camera calibration
undist = cv2.undistort(img, mtx, dist, None, mtx)
undist = cv2.cvtColor(undist, cv2.COLOR_BGR2RGB)
# color and gradient thresholding
combined_binary = color_and_gradient_thresholding(undist)
# Plotting thresholded images
if plot:
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20,10))
ax1.set_title('Combined S channel and gradient thresholds')
ax1.imshow(combined_binary, cmap='gray')
# perspective warp the image to have top down view
binary_warped = cv2.warpPerspective(combined_binary, M, (w+70, rect_h))
# if we have identified lane lines in previous frames, we can just use
# points from an area around the previous lines to find new line
left_mask = left_line.get_mask(binary_warped.shape[0], binary_warped.shape[1])
right_mask = right_line.get_mask(binary_warped.shape[0], binary_warped.shape[1])
run_sliding_window_search = False
if left_mask is not None and right_mask is not None and not plot:
left_masked = np.zeros_like(binary_warped)
left_masked[(left_mask == 1) & (binary_warped == 1)] = 1
right_masked = np.zeros_like(binary_warped)
right_masked[(right_mask == 1) & (binary_warped == 1)] = 1
leftx = left_masked.nonzero()[1]
lefty = left_masked.nonzero()[0]
rightx = right_masked.nonzero()[1]
righty = right_masked.nonzero()[0]
if plot:
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
out_img[left_mask.nonzero()[0], left_mask.nonzero()[1]] = [0, 255, 0]
out_img[right_mask.nonzero()[0], right_mask.nonzero()[1]] = [0, 255, 0]
out_img[binary_warped.nonzero()[0], binary_warped.nonzero()[1]] = [255, 255, 255]
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
# Fit a second order polynomial to each
left_line.fit(leftx, lefty)
right_line.fit(rightx, righty)
if sanity_check(left_line, right_line) is False:
for line in left_line, right_line:
line.pop() # pop last result
cv2.putText(undist, "Masked search failed", (200, 150), cv2.FONT_HERSHEY_TRIPLEX, 1, (255, 255, 255))
run_sliding_window_search = True
else:
cv2.putText(undist, "Masked search succeeded", (200, 150), cv2.FONT_HERSHEY_TRIPLEX, 1, (255, 255, 255))
else:
run_sliding_window_search = True
if run_sliding_window_search:
# do a slow sliding window search
leftx, lefty, rightx, righty, out_img = sliding_window_search(binary_warped, plot)
# Fit a second order polynomial to each
left_line.fit(leftx, lefty)
right_line.fit(rightx, righty)
if sanity_check(left_line, right_line) is False:
for line in left_line, right_line:
line.pop() # pop last result
line.update(False, None) # indicate we did not find the lanes
cv2.putText(undist, "Sliding window search failed", (200, 200), cv2.FONT_HERSHEY_TRIPLEX, 1, (255, 255, 255))
# continue with lane lines found in previous frames
else:
cv2.putText(undist, "Sliding window search succeeded", (200, 200), cv2.FONT_HERSHEY_TRIPLEX, 1, (255, 255, 255))
# s = "previous results: {} {}".format(len(left_line.recent_fitted), len(right_line.recent_fitted))
# cv2.putText(undist, s, (400, 400), cv2.FONT_HERSHEY_TRIPLEX, 1, (255, 255, 255))
if left_line.best_fit is None or right_line.best_fit is None:
if plot:
ax2.imshow(out_img)
ax3.imshow(out_img)
return cv2.cvtColor(undist, cv2.COLOR_BGR2RGB)
# Generate x and y values for plotting
ploty = np.linspace(30, binary_warped.shape[0])
left_fitx = left_line.evaluate(ploty)
right_fitx = right_line.evaluate(ploty)
if plot:
ax2.imshow(out_img)
ax2.plot(left_fitx, ploty, color='yellow')
ax2.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, binary_warped.shape[1])
plt.ylim(binary_warped.shape[0], 0)
# calclulate radius of curvature
# Define y-value where we want radius of curvature
# I'll choose the y-value corresponding to the middle of the image
y_eval = binary_warped.shape[0]/2
left_curverad = left_line.get_curvature(y_eval)
right_curverad = right_line.get_curvature(y_eval)
if plot:
lx_eval = left_line.evaluate(y_eval)
rx_eval = right_line.evaluate(y_eval)
ax2.annotate('Curvature:\n{}m'.format(int(left_curverad)), xy=(lx_eval, y_eval), xytext=(lx_eval+200, y_eval), color='w', arrowprops={'color': 'w'})
ax2.annotate('Curvature:\n{}m'.format(int(right_curverad)), xy=(rx_eval, y_eval), xytext=(rx_eval-300, y_eval), color='w', arrowprops={'color': 'w'})
# calculate deviation from centre
y_eval = binary_warped.shape[0] # calculate bottom of image where the head of the car is
lx_eval = left_line.evaluate(y_eval)
rx_eval = right_line.evaluate(y_eval)
deviation = ((lx_eval+rx_eval)/2 - binary_warped.shape[1]/2)*Line.xm_per_pix
if plot:
ax2.text(binary_warped.shape[1]/2, binary_warped.shape[0]/3, 'deviation:\n{:.02}m'.format(deviation), color='w')
# Project lane lines back onto camera view
# Create an image to draw the lines on
warp_zero = np.zeros_like(binary_warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, np.linalg.inv(M), (undist.shape[1], undist.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
# put curvature and deviation text on the image
s = "Curvature {:4d} Deviation {:02.02}"\
.format(int((left_curverad+right_curverad)/2), deviation)
cv2.putText(result, s, (200, 100), cv2.FONT_HERSHEY_TRIPLEX, 1, (255, 255, 255))
if plot:
ax3.imshow(result)
result = cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
return result
test_images = glob.glob('test_images/test*.jpg')
for fname in test_images:
left_line = Line()
right_line = Line()
print(fname)
img = cv2.imread(fname)
pipeline(img, plot=True)
plt.show()
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
white_output = 'test_videos_output/project_video.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
# clip1 = VideoFileClip("project_video.mp4").subclip(0,5)
clip1 = VideoFileClip("project_video.mp4")
white_clip = clip1.fl_image(pipeline) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
white_output = 'test_videos_output/harder_challenge_video.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("harder_challenge_video.mp4")
white_clip = clip1.fl_image(pipeline) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))